extern void generic_identify(struct cpuinfo_x86 * c);
extern void early_intel_workaround(struct cpuinfo_x86 *c);
-
+extern void intel_cpuid_faulting_flip(unsigned int enable);
struct movsl_mask movsl_mask __read_mostly;
#endif
+static unsigned int probe_intel_cpuid_faulting(void)
+{
+ uint32_t x;
+ return !rdmsr_safe(MSR_INTEL_PLATFORM_INFO, x) && (x & (1u<<31));
+}
+
+static DEFINE_PER_CPU(bool_t, cpuid_faulting_enabled);
+void set_cpuid_faulting(bool_t enable)
+{
+ uint32_t hi, lo;
+
+ if (!cpu_has_cpuid_faulting ||
+ this_cpu(cpuid_faulting_enabled) == enable )
+ return;
+
+ rdmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
+ lo &= ~1;
+ if (enable)
+ lo |= 1;
+ wrmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
+
+ this_cpu(cpuid_faulting_enabled) = enable;
+}
+
/*
* opt_cpuid_mask_ecx/edx: cpuid.1[ecx, edx] feature mask.
* For example, E8400[Intel Core 2 Duo Processor series] ecx = 0x0008E3FD,
detect_ht(c);
}
- set_cpuidmask(c);
+ if (smp_processor_id() == 0) {
+ if (probe_intel_cpuid_faulting())
+ set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
+ } else if (boot_cpu_has(X86_FEATURE_CPUID_FAULTING)) {
+ BUG_ON(!probe_intel_cpuid_faulting());
+ set_bit(X86_FEATURE_CPUID_FAULTING, c->x86_capability);
+ }
+
+ if (!cpu_has_cpuid_faulting)
+ set_cpuidmask(c);
+
+ BUG_ON(cpu_has(c, X86_FEATURE_CPUID_FAULTING) !=
+ boot_cpu_has(X86_FEATURE_CPUID_FAULTING));
/* Work around errata */
Intel_errata_workarounds(c);
load_LDT(next);
load_segments(next);
}
+
+ set_cpuid_faulting(!is_hvm_vcpu(next) &&
+ (next->domain->domain_id != 0));
}
context_saved(prev);
twobyte_opcode:
/*
- * All 2 and 3 byte opcodes, except RDTSC (0x31) and RDTSCP (0x1,0xF9)
- * are executable only from guest kernel mode (virtual ring 0).
+ * All 2 and 3 byte opcodes, except RDTSC (0x31), RDTSCP (0x1,0xF9),
+ * and CPUID (0xa2), are executable only from guest kernel mode
+ * (virtual ring 0).
*/
opcode = insn_fetch(u8, code_base, eip, code_limit);
- if ( !guest_kernel_mode(v, regs) && (opcode != 0x1) && (opcode != 0x31) )
+ if ( !guest_kernel_mode(v, regs) &&
+ (opcode != 0x1) && (opcode != 0x31) && (opcode != 0xa2) )
goto fail;
if ( lock && (opcode & ~3) != 0x20 )
}
break;
+ case 0xa2: /* CPUID */
+ pv_cpuid(regs);
+ break;
+
default:
goto fail;
}
#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
#define X86_FEATURE_TSC_RELIABLE (3*32+12) /* TSC is known to be reliable */
#define X86_FEATURE_XTOPOLOGY (3*32+13) /* cpu topology enum extensions */
+#define X86_FEATURE_CPUID_FAULTING (3*32+14) /* cpuid faulting */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
#define cpu_has_vmx boot_cpu_has(X86_FEATURE_VMXE)
+#define cpu_has_cpuid_faulting boot_cpu_has(X86_FEATURE_CPUID_FAULTING)
+
#endif /* __ASM_I386_CPUFEATURE_H */
/*
#define MSR_P6_EVNTSEL0 0x00000186
#define MSR_P6_EVNTSEL1 0x00000187
-/* MSRs for Intel cpuid feature mask */
-#define MSR_INTEL_CPUID_FEATURE_MASK 0x00000478
-#define MSR_INTEL_CPUID1_FEATURE_MASK 0x00000130
-#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131
-
/* MSRs & bits used for VMX enabling */
#define MSR_IA32_VMX_BASIC 0x480
#define MSR_IA32_VMX_PINBASED_CTLS 0x481
#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390
+/* Intel cpuid spoofing MSRs */
+#define MSR_INTEL_CPUID_FEATURE_MASK 0x00000478
+#define MSR_INTEL_CPUID1_FEATURE_MASK 0x00000130
+#define MSR_INTEL_CPUID80000001_FEATURE_MASK 0x00000131
+
+/* Intel cpuid faulting MSRs */
+#define MSR_INTEL_PLATFORM_INFO 0x000000ce
+#define MSR_INTEL_MISC_FEATURES_ENABLES 0x00000140
+
/* Geode defined MSRs */
#define MSR_GEODE_BUSCONT_CONF0 0x00001900
#define current_cpu_data boot_cpu_data
#endif
+extern void set_cpuid_faulting(bool_t enable);
+
extern u64 host_pat;
extern bool_t opt_cpu_info;